# Histogramas de las variablesfiltered_df[['PPG', 'ECG', 'resp']].hist(bins=50, figsize=(10, 8))plt.suptitle("Distribuciones de las Variables")plt.show()
4.6 Detección de valores atípicos
Code
# Visualización de boxplots para detectar valores atípicosfiltered_df[['PPG', 'ECG', 'resp']].plot(kind='box', subplots=True, layout=(1, 3), figsize=(12, 6))plt.suptitle("Boxplots para Identificación de Valores Atípicos")plt.show()
4.7 Analisis de correlacion
Code
# Matriz de correlacióncorrelation_matrix = filtered_df.corr()sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm')plt.title("Matriz de Correlación")plt.show()
4.8 Analisi de frequencias
Code
# Transformada de Fourier en PPGppg_freq = np.fft.fft(filtered_df['PPG'].values)frequencies = np.fft.fftfreq(len(ppg_freq), d=0.008) # 0.008 s entre cada medida# Gráfica de frecuenciasplt.figure(figsize=(12, 6))plt.plot(frequencies[:len(frequencies)//2], np.abs(ppg_freq)[:len(ppg_freq)//2])plt.title("Análisis de Frecuencia de PPG")plt.xlabel("Frecuencia (Hz)")plt.ylabel("Amplitud")plt.show()
4.9 Reconstruir la señal
Code
fig = make_subplots(rows=2, cols=1, shared_xaxes=True, subplot_titles=("PPG Signal", "Respiration Signal"), vertical_spacing=0.1)# Agregar la señal PPGfig.add_trace(go.Scatter(x=filtered_df["Time"], y=filtered_df["PPG"], mode="lines", name="PPG"), row=1, col=1)# Agregar la señal de respiraciónfig.add_trace(go.Scatter(x=filtered_df["Time"], y=filtered_df["resp"], mode="lines", name="Resp"), row=2, col=1)# Actualizar el layoutfig.update_layout(height=600, width=800, title_text="Señales PPG y Respiración", showlegend=False)fig.update_xaxes(title_text="Time (s)", row=2, col=1)fig.update_yaxes(title_text="PPG", row=1, col=1)fig.update_yaxes(title_text="Resp", row=2, col=1)# Mostrar gráficofig.show()
Figure 1: Reconstruccion de la señal de uno de los usuarios
5.2 Obtener la fingerprint de la union de las señales(PPG y respiración)
Code
from utils.extact_features import compute_features_parallelcombined_features = compute_features_parallel(segments_ppg, segments_resp, FS)combined_features[0], len(combined_features)
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/numpy/_core/fromnumeric.py:3596: RuntimeWarning: Mean of empty slice.
return _methods._mean(a, axis=axis, dtype=dtype,
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/numpy/_core/_methods.py:138: RuntimeWarning: invalid value encountered in scalar divide
ret = ret.dtype.type(ret / rcount)
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/numpy/_core/fromnumeric.py:3596: RuntimeWarning: Mean of empty slice.
return _methods._mean(a, axis=axis, dtype=dtype,
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/numpy/_core/_methods.py:138: RuntimeWarning: invalid value encountered in scalar divide
ret = ret.dtype.type(ret / rcount)
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/numpy/_core/fromnumeric.py:3596: RuntimeWarning: Mean of empty slice.
return _methods._mean(a, axis=axis, dtype=dtype,
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/numpy/_core/_methods.py:138: RuntimeWarning: invalid value encountered in scalar divide
ret = ret.dtype.type(ret / rcount)
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/numpy/_core/fromnumeric.py:3596: RuntimeWarning: Mean of empty slice.
return _methods._mean(a, axis=axis, dtype=dtype,
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/numpy/_core/_methods.py:138: RuntimeWarning: invalid value encountered in scalar divide
ret = ret.dtype.type(ret / rcount)
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/numpy/_core/fromnumeric.py:3596: RuntimeWarning: Mean of empty slice.
return _methods._mean(a, axis=axis, dtype=dtype,
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/numpy/_core/_methods.py:138: RuntimeWarning: invalid value encountered in scalar divide
ret = ret.dtype.type(ret / rcount)
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/numpy/_core/fromnumeric.py:3596: RuntimeWarning: Mean of empty slice.
return _methods._mean(a, axis=axis, dtype=dtype,
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/numpy/_core/_methods.py:138: RuntimeWarning: invalid value encountered in scalar divide
ret = ret.dtype.type(ret / rcount)
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/numpy/_core/fromnumeric.py:3596: RuntimeWarning: Mean of empty slice.
return _methods._mean(a, axis=axis, dtype=dtype,
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/numpy/_core/_methods.py:138: RuntimeWarning: invalid value encountered in scalar divide
ret = ret.dtype.type(ret / rcount)
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/numpy/_core/fromnumeric.py:3596: RuntimeWarning: Mean of empty slice.
return _methods._mean(a, axis=axis, dtype=dtype,
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/numpy/_core/_methods.py:138: RuntimeWarning: invalid value encountered in scalar divide
ret = ret.dtype.type(ret / rcount)
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/model_selection/_split.py:91: UserWarning:
The groups parameter is ignored by RepeatedKFold
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/model_selection/_split.py:91: UserWarning:
The groups parameter is ignored by KFold
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
Probando clasificador: Logistic Regression
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
Probando clasificador: Decision Tree
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
Probando clasificador: Naive Bayes Gaussiano
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
/home/think-crag/.cache/pypoetry/virtualenvs/experimentos-NZxwsTu5-py3.12/lib/python3.12/site-packages/sklearn/metrics/_classification.py:1531: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
Loading ITables v2.2.3 from the internet...
(need help?)
6.1 Gráfico de líneas de precisión de los clasificadores
Code
# Crear gráfico de líneasfig = go.Figure()unique_models = df_results['model'].unique()# Trazar los datos para cada clasificadorfor name in unique_models:# Filtrar los datos para el clasificador actual model_data = df_results[df_results['model'] == name]# Agregar el trazo fig.add_trace( go.Scatter( x=model_data.index +1, # Índices ajustados como número de prueba y=model_data['accuracy'], # Precisión mode="lines+markers", # Mostrar líneas y puntos name=name, # Nombre del clasificador ) )# Configuración del diseñofig.update_layout( title="Comparación de Accuracy entre Clasificadores", xaxis_title="Número de Prueba", yaxis_title="Accuracy", template="plotly_white",)# Mostrar gráficofig.show()
6.2 Comparacion con el estado del arte
Code
mejor_modelo = df_results.loc[df_results['accuracy'].idxmax()]models_accuracies = {"Pu et al": .9894,"Zhao et al static": .96,"Zhao et al mivement": .9073,"Aly et al": .935,"Wu et al": .921,"Zhang": .949,f"Proposed ({mejor_modelo['model']})": mejor_modelo['accuracy'],}# Prepare datamodels =list(models_accuracies.keys())accuracies =list(models_accuracies.values())# Create the scatter plotfig = go.Figure(data=[ go.Scatter( x=models, y=accuracies, mode='markers+lines', marker=dict(size=10, color=accuracies, colorscale='Viridis', showscale=True), line=dict(dash='solid'), name='Accuracy' )])# Customize layoutfig.update_layout( title="Model Accuracies", xaxis_title="Models", yaxis_title="Accuracy", yaxis=dict(range=[0.9, 1.0]), template="plotly_white")# Display the chartfig.show()
Source Code
---title: Experimentos Tesisauthor: Diego Cruz Aguilardate: 2023-07-01toc: truenumber-sections: trueexecute: echo: true cache: trueformat: html: code-fold: true code-tools: truejupyter: python3---{{< include mimic_one.qmd >}}# Resultados```{python}df_results = pd.DataFrame(results)df_results.to_csv("One-to-Many-Results.csv", index=False)df_ordenado = df_results.sort_values(by='accuracy', ascending=False)show(df_ordenado)```## Gráfico de líneas de precisión de los clasificadores```{python}# Crear gráfico de líneasfig = go.Figure()unique_models = df_results['model'].unique()# Trazar los datos para cada clasificadorfor name in unique_models:# Filtrar los datos para el clasificador actual model_data = df_results[df_results['model'] == name]# Agregar el trazo fig.add_trace( go.Scatter( x=model_data.index +1, # Índices ajustados como número de prueba y=model_data['accuracy'], # Precisión mode="lines+markers", # Mostrar líneas y puntos name=name, # Nombre del clasificador ) )# Configuración del diseñofig.update_layout( title="Comparación de Accuracy entre Clasificadores", xaxis_title="Número de Prueba", yaxis_title="Accuracy", template="plotly_white",)# Mostrar gráficofig.show()```## Comparacion con el estado del arte```{python}mejor_modelo = df_results.loc[df_results['accuracy'].idxmax()]models_accuracies = {"Pu et al": .9894,"Zhao et al static": .96,"Zhao et al mivement": .9073,"Aly et al": .935,"Wu et al": .921,"Zhang": .949,f"Proposed ({mejor_modelo['model']})": mejor_modelo['accuracy'],}# Prepare datamodels =list(models_accuracies.keys())accuracies =list(models_accuracies.values())# Create the scatter plotfig = go.Figure(data=[ go.Scatter( x=models, y=accuracies, mode='markers+lines', marker=dict(size=10, color=accuracies, colorscale='Viridis', showscale=True), line=dict(dash='solid'), name='Accuracy' )])# Customize layoutfig.update_layout( title="Model Accuracies", xaxis_title="Models", yaxis_title="Accuracy", yaxis=dict(range=[0.9, 1.0]), template="plotly_white")# Display the chartfig.show()```